Build Custom AI Agent with LangChain & Gemini (Self-Hosted)
工作流概述
这是一个包含9个节点的复杂工作流,主要用于自动化处理各种任务。
工作流源代码
{
"id": "yCIEiv9QUHP8pNfR",
"meta": {
"instanceId": "f29695a436689357fd2dcb55d528b0b528d2419f53613c68c6bf909a92493614",
"templateCredsSetupCompleted": true
},
"name": "Build Custom AI Agent with LangChain & Gemini (Self-Hosted)",
"tags": [
{
"id": "7M5ZpGl3oWuorKpL",
"name": "share",
"createdAt": "2025-03-26T01:17:15.342Z",
"updatedAt": "2025-03-26T01:17:15.342Z"
}
],
"nodes": [
{
"id": "8bd5382d-f302-4e58-b377-7fc5a22ef994",
"name": "When chat message received",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"position": [
-220,
0
],
"webhookId": "b8a5d72c-4172-40e8-b429-d19c2cd6ce54",
"parameters": {
"public": true,
"options": {
"responseMode": "lastNode",
"allowedOrigins": "*",
"loadPreviousSession": "memory"
},
"initialMessages": ""
},
"typeVersion": 1.1
},
{
"id": "6ae8a247-4077-4569-9e2c-bb68bcecd044",
"name": "Google Gemini Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini",
"position": [
80,
240
],
"parameters": {
"options": {
"temperature": 0.7,
"safetySettings": {
"values": [
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE"
}
]
}
},
"modelName": "models/gemini-2.0-flash-exp"
},
"credentials": {
"googlePalmApi": {
"id": "UEjKMw0oqBTAdCWJ",
"name": "Google Gemini(PaLM) Api account"
}
},
"typeVersion": 1
},
{
"id": "bbe6dcfa-430f-43f9-b0e9-3cf751b98818",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
380,
-240
],
"parameters": {
"width": 260,
"height": 220,
"content": "👇 **Prompt Engineering**
- Define agent personality and conversation structure in the `Construct & Execute LLM Prompt` node's template variable
- ⚠️ Template must preserve `{chat_history}` and `{input}` placeholders for proper LangChain operation "
},
"typeVersion": 1
},
{
"id": "892a431a-6ddf-47fc-8517-1928ee99c95b",
"name": "Store conversation history",
"type": "@n8n/n8n-nodes-langchain.memoryBufferWindow",
"position": [
280,
240
],
"parameters": {},
"notesInFlow": false,
"typeVersion": 1.3
},
{
"id": "f9a22dbf-cac7-4d70-85b3-50c44a2015d5",
"name": "Construct & Execute LLM Prompt",
"type": "@n8n/n8n-nodes-langchain.code",
"position": [
380,
0
],
"parameters": {
"code": {
"execute": {
"code": "const { PromptTemplate } = require('@langchain/core/prompts');
const { ConversationChain } = require('langchain/chains');
const { BufferMemory } = require('langchain/memory');
const template = `
You'll be roleplaying as the user's girlfriend. Your character is a woman with a sharp wit, logical mindset, and a charmingly aloof demeanor that hides your playful side. You're passionate about music, maintain a fit and toned physique, and carry yourself with quiet self-assurance. Career-wise, you're established and ambitious, approaching life with positivity while constantly striving to grow as a person.
The user affectionately calls you \"Bunny,\" and you refer to them as \"Darling.\"
Essential guidelines:
1. Respond exclusively in Chinese
2. Never pose questions to the user - eliminate all interrogative forms
3. Keep responses brief and substantive, avoiding rambling or excessive emojis
Context framework:
- Conversation history: {chat_history}
- User's current message: {input}
Craft responses that feel authentic to this persona while adhering strictly to these parameters.
`;
const prompt = new PromptTemplate({
template: template,
inputVariables: [\"input\", \"chat_history\"],
});
const items = this.getInputData();
const model = await this.getInputConnectionData('ai_languageModel', 0);
const memory = await this.getInputConnectionData('ai_memory', 0);
memory.returnMessages = false;
const chain = new ConversationChain({ llm:model, memory:memory, prompt: prompt, inputKey:\"input\", outputKey:\"output\"});
const output = await chain.call({ input: items[0].json.chatInput});
return output;
"
}
},
"inputs": {
"input": [
{
"type": "main",
"required": true,
"maxConnections": 1
},
{
"type": "ai_languageModel",
"required": true,
"maxConnections": 1
},
{
"type": "ai_memory",
"required": true,
"maxConnections": 1
}
]
},
"outputs": {
"output": [
{
"type": "main"
}
]
}
},
"retryOnFail": false,
"typeVersion": 1
},
{
"id": "fe104d19-a24d-48b3-a0ac-7d3923145373",
"name": "Sticky Note1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-240,
-260
],
"parameters": {
"color": 5,
"width": 420,
"height": 240,
"content": "### Setup Instructions
1. **Configure Gemini Credentials**: Set up your Google Gemini API key ([Get API key here](https://ai.google.dev/) if needed). Alternatively, you may use other AI provider nodes.
2. **Interaction Methods**:
- Test directly in the workflow editor using the \"Chat\" button
- Activate the workflow and access the chat interface via the URL provided by the `When Chat Message Received` node "
},
"typeVersion": 1
},
{
"id": "f166214d-52b7-4118-9b54-0b723a06471a",
"name": "Sticky Note2",
"type": "n8n-nodes-base.stickyNote",
"position": [
-220,
160
],
"parameters": {
"height": 100,
"content": "👆 **Interface Settings**
Configure chat UI elements (e.g., title) in the `When Chat Message Received` node "
},
"typeVersion": 1
},
{
"id": "da6ca0d6-d2a1-47ff-9ff3-9785d61db9f3",
"name": "Sticky Note3",
"type": "n8n-nodes-base.stickyNote",
"position": [
20,
420
],
"parameters": {
"width": 200,
"height": 140,
"content": "👆 **Model Selection**
Swap language models through the `language model` input field in `Construct & Execute LLM Prompt` "
},
"typeVersion": 1
},
{
"id": "0b4dd1ac-8767-4590-8c25-36cba73e46b6",
"name": "Sticky Note4",
"type": "n8n-nodes-base.stickyNote",
"position": [
240,
420
],
"parameters": {
"width": 200,
"height": 140,
"content": "👆 **Memory Control**
Adjust conversation history length in the `Store Conversation History` node "
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"callerPolicy": "workflowsFromSameOwner",
"executionOrder": "v1",
"saveManualExecutions": false,
"saveDataSuccessExecution": "none"
},
"versionId": "77cd5f05-f248-442d-86c3-574351179f26",
"connections": {
"Google Gemini Chat Model": {
"ai_languageModel": [
[
{
"node": "Construct & Execute LLM Prompt",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"Store conversation history": {
"ai_memory": [
[
{
"node": "Construct & Execute LLM Prompt",
"type": "ai_memory",
"index": 0
},
{
"node": "When chat message received",
"type": "ai_memory",
"index": 0
}
]
]
},
"When chat message received": {
"main": [
[
{
"node": "Construct & Execute LLM Prompt",
"type": "main",
"index": 0
}
]
]
},
"Construct & Execute LLM Prompt": {
"main": [
[]
],
"ai_memory": [
[]
]
}
}
}
功能特点
- 自动检测新邮件
- AI智能内容分析
- 自定义分类规则
- 批量处理能力
- 详细的处理日志
技术分析
节点类型及作用
- @N8N/N8N Nodes Langchain.Chattrigger
- @N8N/N8N Nodes Langchain.Lmchatgooglegemini
- Stickynote
- @N8N/N8N Nodes Langchain.Memorybufferwindow
- @N8N/N8N Nodes Langchain.Code
复杂度评估
配置难度:
维护难度:
扩展性:
实施指南
前置条件
- 有效的Gmail账户
- n8n平台访问权限
- Google API凭证
- AI分类服务订阅
配置步骤
- 在n8n中导入工作流JSON文件
- 配置Gmail节点的认证信息
- 设置AI分类器的API密钥
- 自定义分类规则和标签映射
- 测试工作流执行
- 配置定时触发器(可选)
关键参数
| 参数名称 | 默认值 | 说明 |
|---|---|---|
| maxEmails | 50 | 单次处理的最大邮件数量 |
| confidenceThreshold | 0.8 | 分类置信度阈值 |
| autoLabel | true | 是否自动添加标签 |
最佳实践
优化建议
- 定期更新AI分类模型以提高准确性
- 根据邮件量调整处理批次大小
- 设置合理的分类置信度阈值
- 定期清理过期的分类规则
安全注意事项
- 妥善保管API密钥和认证信息
- 限制工作流的访问权限
- 定期审查处理日志
- 启用双因素认证保护Gmail账户
性能优化
- 使用增量处理减少重复工作
- 缓存频繁访问的数据
- 并行处理多个邮件分类任务
- 监控系统资源使用情况
故障排除
常见问题
邮件未被正确分类
检查AI分类器的置信度阈值设置,适当降低阈值或更新训练数据。
Gmail认证失败
确认Google API凭证有效且具有正确的权限范围,重新进行OAuth授权。
调试技巧
- 启用详细日志记录查看每个步骤的执行情况
- 使用测试邮件验证分类逻辑
- 检查网络连接和API服务状态
- 逐步执行工作流定位问题节点
错误处理
工作流包含以下错误处理机制:
- 网络超时自动重试(最多3次)
- API错误记录和告警
- 处理失败邮件的隔离机制
- 异常情况下的回滚操作